From: Keir Fraser Date: Thu, 8 Jul 2010 08:52:51 +0000 (+0100) Subject: xen: make the shadow allocation hypercalls include the p2m memory X-Git-Tag: archive/raspbian/4.8.0-1+rpi1~1^2~11827 X-Git-Url: https://dgit.raspbian.org/%22http://www.example.com/cgi/%22/%22http:/www.example.com/cgi/%22?a=commitdiff_plain;h=6dddb7b172168153374eb7ebb4daa42e407fc3c4;p=xen.git xen: make the shadow allocation hypercalls include the p2m memory in the total shadow allocation. This makes the effect of allocation changes consistent regardless of p2m activity on boot. Signed-off-by: Tim Deegan --- diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c index 0058461609..60f6a3c975 100644 --- a/xen/arch/x86/mm/hap/hap.c +++ b/xen/arch/x86/mm/hap/hap.c @@ -334,7 +334,8 @@ static void hap_free_p2m_page(struct domain *d, struct page_info *pg) static unsigned int hap_get_allocation(struct domain *d) { - unsigned int pg = d->arch.paging.hap.total_pages; + unsigned int pg = d->arch.paging.hap.total_pages + + d->arch.paging.hap.p2m_pages; return ((pg >> (20 - PAGE_SHIFT)) + ((pg & ((1 << (20 - PAGE_SHIFT)) - 1)) ? 1 : 0)); @@ -349,6 +350,11 @@ hap_set_allocation(struct domain *d, unsigned int pages, int *preempted) ASSERT(hap_locked_by_me(d)); + if ( pages < d->arch.paging.hap.p2m_pages ) + pages = 0; + else + pages -= d->arch.paging.hap.p2m_pages; + while ( d->arch.paging.hap.total_pages != pages ) { if ( d->arch.paging.hap.total_pages < pages ) @@ -367,6 +373,11 @@ hap_set_allocation(struct domain *d, unsigned int pages, int *preempted) else if ( d->arch.paging.hap.total_pages > pages ) { /* Need to return memory to domheap */ + if ( page_list_empty(&d->arch.paging.hap.freelist) ) + { + HAP_PRINTK("failed to free enough hap pages.\n"); + return -ENOMEM; + } pg = page_list_remove_head(&d->arch.paging.hap.freelist); ASSERT(pg); d->arch.paging.hap.free_pages--; diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c index a42de64725..ca756a4818 100644 --- a/xen/arch/x86/mm/shadow/common.c +++ b/xen/arch/x86/mm/shadow/common.c @@ -1817,14 +1817,24 @@ static unsigned int sh_set_allocation(struct domain *d, unsigned int j, order = shadow_max_order(d); ASSERT(shadow_locked_by_me(d)); - - /* Don't allocate less than the minimum acceptable, plus one page per - * megabyte of RAM (for the p2m table) */ - lower_bound = shadow_min_acceptable_pages(d) + (d->tot_pages / 256); - if ( pages > 0 && pages < lower_bound ) - pages = lower_bound; - /* Round up to largest block size */ - pages = (pages + ((1< 0 ) + { + /* Check for minimum value. */ + if ( pages < d->arch.paging.shadow.p2m_pages ) + pages = 0; + else + pages -= d->arch.paging.shadow.p2m_pages; + + /* Don't allocate less than the minimum acceptable, plus one page per + * megabyte of RAM (for the p2m table) */ + lower_bound = shadow_min_acceptable_pages(d) + (d->tot_pages / 256); + if ( pages < lower_bound ) + pages = lower_bound; + + /* Round up to largest block size */ + pages = (pages + ((1<arch.paging.shadow.total_pages, pages); @@ -1884,7 +1894,8 @@ static unsigned int sh_set_allocation(struct domain *d, /* Return the size of the shadow pool, rounded up to the nearest MB */ static unsigned int shadow_get_allocation(struct domain *d) { - unsigned int pg = d->arch.paging.shadow.total_pages; + unsigned int pg = d->arch.paging.shadow.total_pages + + d->arch.paging.shadow.p2m_pages; return ((pg >> (20 - PAGE_SHIFT)) + ((pg & ((1 << (20 - PAGE_SHIFT)) - 1)) ? 1 : 0)); }